bitkeeper revision 1.1236.59.1 (4252adc5cfdr_zYEiMj6H6F9DIrq4A)
authordjm@djmnc4000.(none) <djm@djmnc4000.(none)>
Tue, 5 Apr 2005 15:24:53 +0000 (15:24 +0000)
committerdjm@djmnc4000.(none) <djm@djmnc4000.(none)>
Tue, 5 Apr 2005 15:24:53 +0000 (15:24 +0000)
privify tool
Signed-off by: Dan Magenheimer (dan.magenheimer@hp.com)

.rootkeys
xen/arch/ia64/tools/privify/Makefile [new file with mode: 0644]
xen/arch/ia64/tools/privify/README.privify [new file with mode: 0644]
xen/arch/ia64/tools/privify/privify.c [new file with mode: 0644]
xen/arch/ia64/tools/privify/privify.h [new file with mode: 0644]
xen/arch/ia64/tools/privify/privify_elf64.c [new file with mode: 0644]

index 734ce70d770891962ed64134af6c2dc90eb9d27c..bada1777d65599389fdbd917406235ac4a4fa044 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 421098b6AUdbxR3wyn1ATcmNuTao_Q xen/arch/ia64/tools/README.xenia64
 42376c6dfyY0eq8MS2dK3BW2rFuEGg xen/arch/ia64/tools/README.xenia64linux
 421098b6rQ2BQ103qu1n1HNofbS2Og xen/arch/ia64/tools/mkbuildtree
+4252ace7eQQmDdwOqsKWdHo8JpKqnQ xen/arch/ia64/tools/privify/Makefile
+4252ace76fKAIizJRS6S84KbK6yXYw xen/arch/ia64/tools/privify/README.privify
+4252ace7uR0Th8eEXiLyafNPTDYrOg xen/arch/ia64/tools/privify/privify.c
+4252ace7H2dIMPFeFwczAVoP4yAHxA xen/arch/ia64/tools/privify/privify.h
+4252ace74lKUPFnO8PmF0Dtpk7Xkng xen/arch/ia64/tools/privify/privify_elf64.c
 41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c
 421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c
 41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S
diff --git a/xen/arch/ia64/tools/privify/Makefile b/xen/arch/ia64/tools/privify/Makefile
new file mode 100644 (file)
index 0000000..9283c0b
--- /dev/null
@@ -0,0 +1,9 @@
+privify: privify_elf64.o privify.o
+       gcc -g privify.o privify_elf64.o -o privify
+
+
+privify_elf64.o: privify_elf64.c
+       gcc -g -D__KERNEL__ -c privify_elf64.c
+
+privify.o: privify.c
+       gcc -nostdinc -g -D__KERNEL__ -c privify.c
diff --git a/xen/arch/ia64/tools/privify/README.privify b/xen/arch/ia64/tools/privify/README.privify
new file mode 100644 (file)
index 0000000..77e3b00
--- /dev/null
@@ -0,0 +1,8 @@
+In this directory, just "make".
+
+Run the resulting program on a vmlinux that has been adjusted
+to run on Xen (see arch/ia64/tools/README.xenia64linux):
+
+       ./privify vmlinux xenlinux
+
+Use the resulting xenlinux file as domain0
diff --git a/xen/arch/ia64/tools/privify/privify.c b/xen/arch/ia64/tools/privify/privify.c
new file mode 100644 (file)
index 0000000..2b10186
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Binary translate privilege-sensitive ops to privileged
+ *
+ * Copyright (C) 2004 Hewlett-Packard Co.
+ *      Dan Magenheimer (dan.magenheimer@hp.com)
+ *
+ */
+
+#include "privify.h"
+
+typedef unsigned long long u64;
+typedef unsigned long long IA64_INST;
+
+typedef union U_IA64_BUNDLE {
+    u64 i64[2];
+    struct { u64 template:5,slot0:41,slot1a:18,slot1b:23,slot2:41; };
+    // NOTE: following doesn't work because bitfields can't cross natural
+    // size boundaries
+    //struct { u64 template:5, slot0:41, slot1:41, slot2:41; };
+} IA64_BUNDLE;
+
+typedef enum E_IA64_SLOT_TYPE { I, M, F, B, L, ILLEGAL } IA64_SLOT_TYPE;
+
+typedef union U_INST64_A5 {
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, imm7b:7, r3:2, imm5c:5, imm9d:9, s:1, major:4; };
+} INST64_A5;
+
+typedef union U_INST64_B4 {
+    IA64_INST inst;
+    struct { u64 qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, wh:2, d:1, un1:1, major:4; };
+} INST64_B4;
+
+typedef union U_INST64_B8 {
+    IA64_INST inst;
+    struct { u64 qp:6, un21:21, x6:6, un4:4, major:4; };
+} INST64_B8;
+
+typedef union U_INST64_B9 {
+    IA64_INST inst;
+    struct { u64 qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
+} INST64_B9;
+
+typedef union U_INST64_I19 {
+    IA64_INST inst;
+    struct { u64 qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
+} INST64_I19;
+
+typedef union U_INST64_I26 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_I26;
+
+typedef union U_INST64_I27 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4;};
+} INST64_I27;
+
+typedef union U_INST64_I28 { // not privileged (mov from AR)
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_I28;
+
+typedef union U_INST64_M28 {
+    IA64_INST inst;
+    struct { u64 qp:6, :14, r3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M28;
+
+typedef union U_INST64_M29 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M29;
+
+typedef union U_INST64_M30 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, imm:7, ar3:7,x4:4,x2:2,x3:3,s:1,major:4;};
+} INST64_M30;
+
+typedef union U_INST64_M31 {
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M31;
+
+typedef union U_INST64_M32 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4;};
+} INST64_M32;
+
+typedef union U_INST64_M33 {
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M33;
+
+typedef union U_INST64_M35 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+       
+} INST64_M35;
+
+typedef union U_INST64_M36 {
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; }; 
+} INST64_M36;
+
+typedef union U_INST64_M41 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; }; 
+} INST64_M41;
+
+typedef union U_INST64_M42 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M42;
+
+typedef union U_INST64_M43 {
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M43;
+
+typedef union U_INST64_M44 {
+    IA64_INST inst;
+    struct { u64 qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
+} INST64_M44;
+
+typedef union U_INST64_M45 {
+    IA64_INST inst;
+    struct { u64 qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M45;
+
+typedef union U_INST64_M46 {
+    IA64_INST inst;
+    struct { u64 qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, major:4; };
+} INST64_M46;
+
+typedef union U_INST64 {
+    IA64_INST inst;
+    struct { u64 :37, major:4; } generic;
+    INST64_A5 A5;      // used in build_hypercall_bundle only
+    INST64_B4 B4;      // used in build_hypercall_bundle only
+    INST64_B8 B8;      // rfi, bsw.[01]
+    INST64_B9 B9;      // break.b
+    INST64_I19 I19;    // used in build_hypercall_bundle only
+    INST64_I26 I26;    // mov register to ar (I unit)
+    INST64_I27 I27;    // mov immediate to ar (I unit)
+    INST64_I28 I28;    // mov from ar (I unit)
+    INST64_M28 M28;    // purge translation cache entry
+    INST64_M29 M29;    // mov register to ar (M unit)
+    INST64_M30 M30;    // mov immediate to ar (M unit)
+    INST64_M31 M31;    // mov from ar (M unit)
+    INST64_M32 M32;    // mov reg to cr
+    INST64_M33 M33;    // mov from cr
+    INST64_M35 M35;    // mov to psr
+    INST64_M36 M36;    // mov from psr
+    INST64_M41 M41;    // translation cache insert
+    INST64_M42 M42;    // mov to indirect reg/translation reg insert
+    INST64_M43 M43;    // mov from indirect reg
+    INST64_M44 M44;    // set/reset system mask
+    INST64_M45 M45;    // translation purge
+    INST64_M46 M46;    // translation access (tpa,tak)
+} INST64;
+
+#define MASK_41 ((u64)0x1ffffffffff)
+
+long priv_verbose = 0;
+#define verbose(a...) do { if (priv_verbose) printf(a); } while(0)
+
+/*
+ * privify_inst
+ *
+ * Replaces privilege-sensitive instructions (and reads from write-trapping
+ * registers) with privileged/trapping instructions as follows:
+ *     mov rx=ar.cflg -> mov ar.cflg=r(x+64) [**]
+ *     mov rx=ar.ky -> mov ar.ky=r(x+64)
+ *     fc rx -> ptc r(x+64)
+ *     thash rx=ry -> tak rx=r(y+64)
+ *     ttag rx=ry -> tpa rx=r(y+64)
+ *     mov rx=cpuid[ry] -> mov r(x+64)=rr[ry]
+ *     mov rx=pmd[ry] -> mov r(x+64)=pmc[ry] [**]
+ *     cover -> break.b 0x1fffff
+ *
+ * [**] not currently implemented
+ */
+IA64_INST privify_inst(IA64_INST inst_val,
+               IA64_SLOT_TYPE slot_type, IA64_BUNDLE *bp, char **msg)
+{
+       INST64 inst = *(INST64 *)&inst_val;
+
+       *msg = 0;
+       switch (slot_type) {
+           case M:
+               // FIXME: Also use for mov_to/from_ar.cflag (M29/M30) (IA32 only)
+               if (inst.generic.major != 1) break;
+               if (inst.M46.x3 != 0) break;
+               if (inst.M31.x6 == 0x22 && inst.M31.ar3 < 8) {
+                       // mov r1=kr -> mov kr=r1+64
+                       verbose("privify_inst: privified mov r1=kr @%p\n",bp);
+                       if (inst.M31.r1 >= 64) *msg = "mov r1=kr w/r1>63";
+                       else privify_mov_from_kr_m(inst);
+                       break;
+               }
+               if (inst.M29.x6 == 0x2a && inst.M29.ar3 < 8)  {// mov kr=r1
+                       if (inst.M29.r2 >= 64) *msg = "mov kr=r2 w/r2>63";
+                       break;
+               }
+               if (inst.M28.x6 == 0x30) {
+                       // fc r3-> ptc r3+64
+                       verbose("privify_inst: privified fc r3 @%p\n",bp);
+                       if (inst.M28.r3 >= 64) *msg = "fc r3 w/r3>63";
+                       else privify_fc(inst);
+                       break;
+               }
+               if (inst.M28.x6 == 0x34) {
+                       if (inst.M28.r3 >= 64) *msg = "ptc.e w/r3>63";
+                       break;
+               }
+               if (inst.M46.un7 != 0) break;
+               if (inst.M46.un1 != 0) break;
+               if (inst.M46.x6 == 0x1a)  { // thash -> tak r1=r3+64
+                       verbose("privify_inst: privified thash @%p\n",bp);
+                       if (inst.M46.r3 >= 64) *msg = "thash w/r3>63";
+                       else privify_thash(inst);
+               }
+               else if (inst.M46.x6 == 0x1b)  { // ttag -> tpa r1=r3+64
+                       verbose("privify_inst: privified ttag @%p\n",bp);
+                       if (inst.M46.r3 >= 64) *msg = "ttag w/r3>63";
+                       else privify_ttag(inst);
+               }
+               else if (inst.M43.x6 == 0x17) {
+                       verbose("privify_inst: privified mov_from_cpuid @%p\n",bp);
+                       if (inst.M43.r1 >= 64) *msg = "mov_from_cpuid w/r1>63";
+                       else privify_mov_from_cpuid(inst);
+               }
+               else if (inst.M46.x6 == 0x1e)  { // tpa
+                       if (inst.M46.r3 >= 64) *msg = "tpa w/r3>63";
+               }
+               else if (inst.M46.x6 == 0x1f)  { // tak
+                       if (inst.M46.r3 >= 64) *msg = "tak w/r3>63";
+               }
+               else if (inst.M43.x6 == 0x10) {
+                       if (inst.M43.r1 >= 64) *msg = "mov_to_rr w/r1>63";
+               }
+               break;
+           case B:
+               if (inst.generic.major != 0) break;
+               if (inst.B8.x6 == 0x2) { // cover -> break.b 0x1fffff
+                       if (inst.B8.un21 != 0) break;
+                       if (inst.B8.un4 != 0) break;
+                       privify_cover(inst);
+                       verbose("privify_inst: privified cover @%p\n",bp);
+               }
+               if (inst.B9.x6 == 0x0) { // (p15) break.b 0x1fffff -> cover
+                       if (inst.B9.qp != 15) break;
+                       if (inst.B9.imm20 != 0xfffff) break;
+                       if (inst.B9.i != 1) break;
+                       inst.B8.x6 = 0x2;
+                       inst.B8.un21 = 0;
+                       inst.B8.un4 = 0;
+                       inst.B8.qp = 0;
+                       verbose("privify_inst: unprivified pseudo-cover @%p\n",
+                                       bp);
+               }
+               break;
+           case I:     // only used for privifying mov_from_ar
+               // FIXME: Also use for mov_to/from_ar.cflag (I26/I27) (IA32 only)
+               if (inst.generic.major != 0) break;
+               if (inst.I28.x6 == 0x32 && !inst.I28.x3 && inst.I28.ar3 < 8) {
+                       // mov r1=kr -> mov kr=r1+64
+                       verbose("privify_inst: privified mov r1=kr @%p\n",bp);
+                       if (inst.I28.r1 >= 64) *msg = "mov r1=kr w/r1>63";
+                       else privify_mov_from_kr_i(inst);
+               }
+               else if (inst.I26.x6 == 0x2a && !inst.I26.x3 &&
+                   inst.I26.ar3 < 8)  {// mov kr=r1
+                       if (inst.I26.r2 >= 64) *msg = "mov kr=r2 w/r2>63";
+               }
+               break;
+           case F: case L: case ILLEGAL:
+               break;
+       }
+       return *(IA64_INST *)&inst;
+}
+
+#define read_slot1(b)      (((b.i64[0]>>46L) | (b.i64[1]<<18UL)) & MASK_41)
+// Not sure why, but this more obvious definition of read_slot1 doesn't work
+// because the compiler treats (b.slot1b<<18UL) as a signed 32-bit integer
+// so not enough bits get used and it gets sign extended to boot!
+//#define read_slot1(b)            ((b.slot1a | (b.slot1b<<18UL)) & MASK_41)
+#define write_slot1(b,inst) do { b.slot1a=inst;b.slot1b=inst>>18UL;} while (0)
+
+
+void privify_memory(void *start, unsigned long len)
+{
+       IA64_BUNDLE bundle, *bp = (IA64_BUNDLE *)start;
+       IA64_INST tmp;
+       char *msg;
+
+printf("privifying %ld bytes of memory at %p\n",len,start);
+       if ((unsigned long)start & 0xfL) {
+               printf("unaligned memory block in privify_memory\n");
+       }
+       len &= ~0xf;
+       for (bundle = *bp; len; len -= 16) {
+           switch(bundle.template) {
+               case 0x06: case 0x07: case 0x14: case 0x15:
+               case 0x1a: case 0x1b: case 0x1e: case 0x1f:
+                       break;
+               case 0x16: case 0x17:
+                       // may be B in slot0/1 but cover can only be slot2
+                       bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
+                       break;
+               case 0x00: case 0x01: case 0x02: case 0x03:
+                       tmp = privify_inst(read_slot1(bundle),I,bp,&msg);
+                       write_slot1(bundle,tmp);
+               case 0x0c: case 0x0d:
+                       bundle.slot2 = privify_inst(bundle.slot2,I,bp,&msg);
+               case 0x04: case 0x05:
+                       // could a privified cover be in slot2 here?
+                       bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
+                       break;
+               case 0x08: case 0x09: case 0x0a: case 0x0b:
+                       bundle.slot2 = privify_inst(bundle.slot2,I,bp,&msg);
+               case 0x0e: case 0x0f:
+                       bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
+                       if (msg) break;
+                       tmp = privify_inst(read_slot1(bundle),M,bp,&msg);
+                       write_slot1(bundle,tmp);
+                       break;
+               case 0x10: case 0x11:
+                       tmp = privify_inst(read_slot1(bundle),I,bp,&msg);
+                       write_slot1(bundle,tmp);
+               case 0x12: case 0x13:
+                       // may be B in slot1 but cover can only be slot2
+               case 0x1c: case 0x1d:
+                       bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
+                       if (msg) break;
+                       bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
+                       break;
+               case 0x18: case 0x19:
+                       bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
+                       if (msg) break;
+                       tmp = privify_inst(read_slot1(bundle),M,bp,&msg);
+                       write_slot1(bundle,tmp);
+                       if (msg) break;
+                       bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
+                       break;
+           }
+           if (msg) {
+               if (bundle.slot2)
+                       printf("privify_memory: %s @%p\n",msg,bp);
+               else
+                       printf("privify_memory: %s @%p probably not insts\n",
+                               msg,bp);
+               printf("privify_memory: bundle=%p,%p\n",
+                       bundle.i64[1],bundle.i64[0]);
+           }
+           *bp = bundle;
+           bundle = *++bp;
+       }
+
+}
diff --git a/xen/arch/ia64/tools/privify/privify.h b/xen/arch/ia64/tools/privify/privify.h
new file mode 100644 (file)
index 0000000..49291b3
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Binary translate privilege-sensitive ops to privileged
+ *
+ * Copyright (C) 2004 Hewlett-Packard Co.
+ *      Dan Magenheimer (dan.magenheimer@hp.com)
+ *
+ */
+
+/*
+ * Macros to replace privilege-sensitive instructions (and reads from
+ * write-trapping registers) with privileged/trapping instructions as follows:
+ *     mov rx=ar.cflg -> mov ar.cflg=r(x+64) [**]
+ *     mov rx=ar.ky -> mov ar.ky=r(x+64)
+ *     fc rx -> ptc r(x+64)
+ *     thash rx=ry -> tak rx=r(y+64)
+ *     ttag rx=ry -> tpa rx=r(y+64)
+ *     mov rx=cpuid[ry] -> mov r(x+64)=rr[ry]
+ *     mov rx=pmd[ry] -> mov r(x+64)=pmc[ry] [**]
+ *     cover -> break.b 0x1fffff
+ *  [**] not implemented yet
+ */
+
+#define notimpl(s) printk(s##" not implemented");
+#define privify_mov_from_cflg_m(i) do { notimpl("mov from ar.cflg"); } while(0)
+#define privify_mov_from_cflg_i(i) do { notimpl("mov from ar.cflg"); } while(0)
+#define privify_mov_from_kr_m(i) do { i.M31.x6 = 0x2a; i.M29.r2 = i.M31.r1 + 64; } while(0)
+#define privify_mov_from_kr_i(i) do { i.I28.x6 = 0x2a; i.I26.r2 = i.I28.r1 + 64; } while(0)
+#define privify_fc(i) do { i.M28.x6 = 0x34; i.M28.r3 = i.M28.r3 + 64; } while(0)
+#define privify_thash(i) do { i.M46.x6 = 0x1f; i.M46.r3 += 64; } while(0)
+#define privify_ttag(i) do { i.M46.x6 = 0x1f; i.M46.r3 += 64; } while(0)
+#define privify_mov_from_cpuid(i) do { i.M43.x6 = 0x10; i.M43.r1 += 64; } while(0)
+#define privify_mov_from_pmd(i) do { notimpl("mov from pmd"); } while(0)
+#define privify_cover(x) do { x.B8.x6 = 0x0; x.B9.imm20 = 0xfffff; x.B9.i = 0x1; } while(0)
+
diff --git a/xen/arch/ia64/tools/privify/privify_elf64.c b/xen/arch/ia64/tools/privify/privify_elf64.c
new file mode 100644 (file)
index 0000000..2fa9e49
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * Binary translate privilege-sensitive ops to privileged
+ *
+ * Copyright (C) 2004 Hewlett-Packard Co.
+ *      Dan Magenheimer (dan.magenheimer@hp.com)
+ *
+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#define ELFSIZE 64
+#include <linux/elf.h>
+
+#define MAX_FILSIZ (32*1024*1024)
+unsigned long buf[MAX_FILSIZ/sizeof(unsigned long)];
+
+static void
+usage (FILE *fp)
+{
+       fprintf(fp, "Usage: privify elf64filein elf64fileout\n");
+}
+
+static void
+panic (char *s)
+{
+       fprintf(stderr, "panic: %s\n",s);
+       exit(1);
+}
+
+static int
+read_file(const char *in_path, char *buf, int maxsize)
+{
+       ssize_t nread, totread = 0, ssize_inc = 8192;
+       int from;
+
+       if ((from = open (in_path, O_RDONLY)) < 0) return -1;
+       maxsize -= ssize_inc; // create safety zone
+       if (maxsize < 0) panic("input file exceeds max size");
+       while ((nread = read(from, buf, ssize_inc)) > 0) {
+               if (nread < 0) return -1; // problem
+               totread += nread;
+               if (nread < ssize_inc) return totread; // done
+               buf += ssize_inc;
+               if (totread > maxsize) // buffer too small
+                       panic("file exceeds max size\n");
+       }
+       return totread;
+}
+
+static int
+write_file(const char *out_path, char *buf, int size)
+{
+       int to;
+
+       if ((to = open(out_path, O_WRONLY|O_CREAT|O_EXCL,0644)) < 0)
+               return -1;
+
+       if (write(to,buf,size) < 0) return -1;
+
+       return 0;
+}
+
+#define IS_ELF(ehdr) ((ehdr).e_ident[EI_MAG0] == ELFMAG0 && \
+                      (ehdr).e_ident[EI_MAG1] == ELFMAG1 && \
+                      (ehdr).e_ident[EI_MAG2] == ELFMAG2 && \
+                      (ehdr).e_ident[EI_MAG3] == ELFMAG3)
+
+
+static void
+privify_elf(char *elfbase)
+{
+       Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfbase;
+       Elf64_Phdr *phdr;
+       Elf64_Shdr *shdr;
+       char *elfaddr;
+       unsigned long size;
+       int h;
+
+       if ( !IS_ELF(*ehdr) )
+               panic("Kernel image does not have an ELF header.\n");
+       for ( h = 0; h < ehdr->e_phnum; h++ ) {
+               phdr = (Elf64_Phdr *)(elfbase +
+                       ehdr->e_phoff + (h*ehdr->e_phentsize));
+               printf("h=%d, phdr=%p,phdr->p_type=%lx",h,phdr,phdr->p_type);
+               if ((phdr->p_type != PT_LOAD)) {
+                       printf("\n");
+                       continue;
+               }
+               size = phdr->p_filesz;
+               elfaddr = elfbase + phdr->p_offset;
+               printf(",elfaddr=%p,size=%d,phdr->p_flags=%lx\n",
+                       elfaddr,size,phdr->p_flags);
+               if (phdr->p_flags & PF_X) privify_memory(elfaddr,size);
+       }
+}
+
+int
+main(int argc, char **argv)
+{
+       char *in_path, *out_path;
+       int fsize;
+
+       if (argc != 3) {
+               usage(stdout);
+               exit(1);
+       }
+       in_path = argv[1];
+       out_path = argv[2];
+       if ((fsize = read_file(in_path,(char *)buf,MAX_FILSIZ)) < 0) {
+               perror("read_file");
+               panic("failed");
+       }
+       privify_elf((char *)buf);
+       fflush(stdout);
+       if (write_file(out_path,(char *)buf,fsize) < 0) {
+               perror("write_file");
+               panic("failed");
+       }
+}